# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.1153.67.19 -> 1.1153.67.20 # include/asm-ia64/spinlock.h 1.16 -> 1.17 # include/asm-ia64/uaccess.h 1.10 -> 1.11 # arch/ia64/ia32/elfcore32.h 1.2 -> 1.3 # include/asm-ia64/acpi.h 1.7 -> 1.8 # arch/ia64/scripts/toolchain-flags 1.4 -> 1.5 # (new) -> 1.1 include/asm-ia64/intel_intrin.h # # The following is the BitKeeper ChangeSet Log # -------------------------------------------- # 03/09/05 jt@bougret.hpl.hp.com 1.1180 # [irda] IrCOMM mod refcount # # o [FEATURE] Finish removing traces of old module refcount stuff # -------------------------------------------- # 03/09/05 jt@bougret.hpl.hp.com 1.1181 # [irda] NSC 3839x probe fixes # # # o [CORRECT] Make NSC 3839x probe and init *really* work # The new 3839x code was totally broken. # Won't affect code for regular 38108/38338 chips. # -------------------------------------------- # 03/09/05 jt@bougret.hpl.hp.com 1.1182 # [irda] irtty cleanup # # o [FEATURE] Finish removing traces of old irtty driver # -------------------------------------------- # 03/09/05 jt@bougret.hpl.hp.com 1.1183 # [irda] LAP close race # # o [CRITICA] Fix a race condition when closing the LAP # prevent the stack to open new LSAPs while we are killing them. # -------------------------------------------- # 03/09/05 jt@bougret.hpl.hp.com 1.1184 # [irda] connect watchdog fixes # # o [CRITICA] In case of connect watchdog, drop reference to the LAP # o [CORRECT] Prevent dumping LSAP after connect watchdog # o [CRITICA] Prevent dumping TSAP if dumping LSAP did fail # o [CORRECT] Only set connected bit on response if LSAP state is correct # -------------------------------------------- # 03/09/05 jt@bougret.hpl.hp.com 1.1185 # [irda] init failure cleanups # # # o [FEATURE] Don't leak stuff in various failure paths # o [FEATURE] Properly initialise self->max_header_size in IrIAP # -------------------------------------------- # 03/09/05 jt@bougret.hpl.hp.com 1.1186 # [irda] Dongle module aliases # # # o [FEATURE] Add module aliases to dongle drivers # -------------------------------------------- # 03/09/05 davidm@tiger.hpl.hp.com 1.1153.67.20 # ia64: Finnish adding ECC support. Based on patch by Suresh Siddah. # -------------------------------------------- # diff -Nru a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h --- a/arch/ia64/ia32/elfcore32.h Tue Sep 9 01:05:06 2003 +++ b/arch/ia64/ia32/elfcore32.h Tue Sep 9 01:05:06 2003 @@ -8,6 +8,8 @@ #ifndef _ELFCORE32_H_ #define _ELFCORE32_H_ +#include + #define USE_ELF_CORE_DUMP 1 /* Override elfcore.h */ @@ -79,8 +81,7 @@ pr_reg[11] = regs->r1; \ pr_reg[12] = regs->cr_iip; \ pr_reg[13] = regs->r17 & 0xffff; \ - asm volatile ("mov %0=ar.eflag ;;" \ - : "=r"(pr_reg[14])); \ + pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \ pr_reg[15] = regs->r12; \ pr_reg[16] = (regs->r17 >> 16) & 0xffff; diff -Nru a/arch/ia64/scripts/toolchain-flags b/arch/ia64/scripts/toolchain-flags --- a/arch/ia64/scripts/toolchain-flags Tue Sep 9 01:05:06 2003 +++ b/arch/ia64/scripts/toolchain-flags Tue Sep 9 01:05:06 2003 @@ -20,7 +20,7 @@ EOF fi -if ! $CC -c $dir/check-model.c -o $out 2>&1 | grep -q 'attribute directive ignored' +if ! $CC -c $dir/check-model.c -o $out 2>&1 | grep __model__ | grep -q attrib then CPPFLAGS="$CPPFLAGS -DHAVE_MODEL_SMALL_ATTRIBUTE" fi diff -Nru a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h --- a/include/asm-ia64/acpi.h Tue Sep 9 01:05:06 2003 +++ b/include/asm-ia64/acpi.h Tue Sep 9 01:05:06 2003 @@ -54,47 +54,35 @@ #define ACPI_ENABLE_IRQS() local_irq_enable() #define ACPI_FLUSH_CPU_CACHE() -#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ - do { \ - __asm__ volatile ("1: ld4 r29=[%1]\n" \ - ";;\n" \ - "mov ar.ccv=r29\n" \ - "mov r2=r29\n" \ - "shr.u r30=r29,1\n" \ - "and r29=-4,r29\n" \ - ";;\n" \ - "add r29=2,r29\n" \ - "and r30=1,r30\n" \ - ";;\n" \ - "add r29=r29,r30\n" \ - ";;\n" \ - "cmpxchg4.acq r30=[%1],r29,ar.ccv\n" \ - ";;\n" \ - "cmp.eq p6,p7=r2,r30\n" \ - "(p7) br.dpnt.few 1b\n" \ - "cmp.gt p8,p9=3,r29\n" \ - ";;\n" \ - "(p8) mov %0=-1\n" \ - "(p9) mov %0=r0\n" \ - :"=r"(Acq):"r"(GLptr):"r2","r29","r30","memory"); \ - } while (0) +static inline int +acpi_acquire_global_lock (unsigned int *lock) +{ + unsigned int old, new, val; + do { + old = *lock; + new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); + val = ia64_cmpxchg4_acq(GLptr, new, old); + } while (unlikely (val != old)); + return (new < 3) ? -1 : 0; +} -#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ - do { \ - __asm__ volatile ("1: ld4 r29=[%1]\n" \ - ";;\n" \ - "mov ar.ccv=r29\n" \ - "mov r2=r29\n" \ - "and r29=-4,r29\n" \ - ";;\n" \ - "cmpxchg4.acq r30=[%1],r29,ar.ccv\n" \ - ";;\n" \ - "cmp.eq p6,p7=r2,r30\n" \ - "(p7) br.dpnt.few 1b\n" \ - "and %0=1,r2\n" \ - ";;\n" \ - :"=r"(Acq):"r"(GLptr):"r2","r29","r30","memory"); \ - } while (0) +static inline int +acpi_release_global_lock (unsigned int *lock) +{ + unsigned int old, new, val; + do { + old = *lock; + new = old & ~0x3; + val = ia64_cmpxchg4_acq(lock, new, old); + } while (unlikely (val != old)); + return old & 0x1; +} + +#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ + ((Acq) = acpi_acquire_global_lock((unsigned int *) GLptr)) + +#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ + ((Acq) = acpi_release_global_lock((unsigned int *) GLptr)) const char *acpi_get_sysname (void); int acpi_request_vector (u32 int_type); diff -Nru a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-ia64/intel_intrin.h Tue Sep 9 01:05:06 2003 @@ -0,0 +1,254 @@ +#ifndef _ASM_IA64_INTEL_INTRIN_H +#define _ASM_IA64_INTEL_INTRIN_H +/* + * Intel Compiler Intrinsics + * + * Copyright (C) 2002,2003 Jun Nakajima + * Copyright (C) 2002,2003 Suresh Siddha + * + */ +#include + +void __lfetch(int lfhint, void *y); +void __lfetch_excl(int lfhint, void *y); +void __lfetch_fault(int lfhint, void *y); +void __lfetch_fault_excl(int lfhint, void *y); + +/* In the following, whichFloatReg should be an integer from 0-127 */ +void __ldfs(const int whichFloatReg, void *src); +void __ldfd(const int whichFloatReg, void *src); +void __ldfe(const int whichFloatReg, void *src); +void __ldf8(const int whichFloatReg, void *src); +void __ldf_fill(const int whichFloatReg, void *src); +void __stfs(void *dst, const int whichFloatReg); +void __stfd(void *dst, const int whichFloatReg); +void __stfe(void *dst, const int whichFloatReg); +void __stf8(void *dst, const int whichFloatReg); +void __stf_spill(void *dst, const int whichFloatReg); + +void __st1_rel(void *dst, const __s8 value); +void __st2_rel(void *dst, const __s16 value); +void __st4_rel(void *dst, const __s32 value); +void __st8_rel(void *dst, const __s64 value); +__u8 __ld1_acq(void *src); +__u16 __ld2_acq(void *src); +__u32 __ld4_acq(void *src); +__u64 __ld8_acq(void *src); + +__u64 __fetchadd4_acq(__u32 *addend, const int increment); +__u64 __fetchadd4_rel(__u32 *addend, const int increment); +__u64 __fetchadd8_acq(__u64 *addend, const int increment); +__u64 __fetchadd8_rel(__u64 *addend, const int increment); + +__u64 __getf_exp(double d); + +/* OS Related Itanium(R) Intrinsics */ + +/* The names to use for whichReg and whichIndReg below come from + the include file asm/ia64regs.h */ + +__u64 __getIndReg(const int whichIndReg, __s64 index); +__u64 __getReg(const int whichReg); + +void __setIndReg(const int whichIndReg, __s64 index, __u64 value); +void __setReg(const int whichReg, __u64 value); + +void __mf(void); +void __mfa(void); +void __synci(void); +void __itcd(__s64 pa); +void __itci(__s64 pa); +void __itrd(__s64 whichTransReg, __s64 pa); +void __itri(__s64 whichTransReg, __s64 pa); +void __ptce(__s64 va); +void __ptcl(__s64 va, __s64 pagesz); +void __ptcg(__s64 va, __s64 pagesz); +void __ptcga(__s64 va, __s64 pagesz); +void __ptri(__s64 va, __s64 pagesz); +void __ptrd(__s64 va, __s64 pagesz); +void __invala (void); +void __invala_gr(const int whichGeneralReg /* 0-127 */ ); +void __invala_fr(const int whichFloatReg /* 0-127 */ ); +void __nop(const int); +void __fc(__u64 *addr); +void __sum(int mask); +void __rum(int mask); +void __ssm(int mask); +void __rsm(int mask); +__u64 __thash(__s64); +__u64 __ttag(__s64); +__s64 __tpa(__s64); + +/* Intrinsics for implementing get/put_user macros */ +void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val); +void __ld_user(const char *tableName, __u64 addr, char size, char relocType); + +/* This intrinsic does not generate code, it creates a barrier across which + * the compiler will not schedule data access instructions. + */ +void __memory_barrier(void); + +void __isrlz(void); +void __dsrlz(void); + +__u64 _m64_mux1(__u64 a, const int n); +__u64 __thash(__u64); + +/* Lock and Atomic Operation Related Intrinsics */ +__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value); +__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value); +__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value); +__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value); + +__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp); + +__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len); +__s64 _m64_shrp(__s64 a, __s64 b, const int count); +__s64 _m64_popcnt(__s64 a); + +#define ia64_barrier() __memory_barrier() + +#define ia64_stop() /* Nothing: As of now stop bit is generated for each + * intrinsic + */ + +#define ia64_getreg __getReg +#define ia64_setreg __setReg + +#define ia64_hint(x) + +#define ia64_mux1_brcst 0 +#define ia64_mux1_mix 8 +#define ia64_mux1_shuf 9 +#define ia64_mux1_alt 10 +#define ia64_mux1_rev 11 + +#define ia64_mux1 _m64_mux1 +#define ia64_popcnt _m64_popcnt +#define ia64_getf_exp __getf_exp +#define ia64_shrp _m64_shrp + +#define ia64_tpa __tpa +#define ia64_invala __invala +#define ia64_invala_gr __invala_gr +#define ia64_invala_fr __invala_fr +#define ia64_nop __nop +#define ia64_sum __sum +#define ia64_ssm __ssm +#define ia64_rum __rum +#define ia64_rsm __rsm +#define ia64_fc __fc + +#define ia64_ldfs __ldfs +#define ia64_ldfd __ldfd +#define ia64_ldfe __ldfe +#define ia64_ldf8 __ldf8 +#define ia64_ldf_fill __ldf_fill + +#define ia64_stfs __stfs +#define ia64_stfd __stfd +#define ia64_stfe __stfe +#define ia64_stf8 __stf8 +#define ia64_stf_spill __stf_spill + +#define ia64_mf __mf +#define ia64_mfa __mfa + +#define ia64_fetchadd4_acq __fetchadd4_acq +#define ia64_fetchadd4_rel __fetchadd4_rel +#define ia64_fetchadd8_acq __fetchadd8_acq +#define ia64_fetchadd8_rel __fetchadd8_rel + +#define ia64_xchg1 _InterlockedExchange8 +#define ia64_xchg2 _InterlockedExchange16 +#define ia64_xchg4 _InterlockedExchange +#define ia64_xchg8 _InterlockedExchange64 + +#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel +#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq +#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel +#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq +#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel +#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq +#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel +#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq + +#define __ia64_set_dbr(index, val) \ + __setIndReg(_IA64_REG_INDR_DBR, index, val) +#define ia64_set_ibr(index, val) \ + __setIndReg(_IA64_REG_INDR_IBR, index, val) +#define ia64_set_pkr(index, val) \ + __setIndReg(_IA64_REG_INDR_PKR, index, val) +#define ia64_set_pmc(index, val) \ + __setIndReg(_IA64_REG_INDR_PMC, index, val) +#define ia64_set_pmd(index, val) \ + __setIndReg(_IA64_REG_INDR_PMD, index, val) +#define ia64_set_rr(index, val) \ + __setIndReg(_IA64_REG_INDR_RR, index, val) + +#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index) +#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) +#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) +#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) +#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) +#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) +#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) + +#define ia64_srlz_d __dsrlz +#define ia64_srlz_i __isrlz + +#define ia64_st1_rel __st1_rel +#define ia64_st2_rel __st2_rel +#define ia64_st4_rel __st4_rel +#define ia64_st8_rel __st8_rel + +#define ia64_ld1_acq __ld1_acq +#define ia64_ld2_acq __ld2_acq +#define ia64_ld4_acq __ld4_acq +#define ia64_ld8_acq __ld8_acq + +#define ia64_sync_i __synci +#define ia64_thash __thash +#define ia64_ttag __ttag +#define ia64_itcd __itcd +#define ia64_itci __itci +#define ia64_itrd __itrd +#define ia64_itri __itri +#define ia64_ptce __ptce +#define ia64_ptcl __ptcl +#define ia64_ptcg __ptcg +#define ia64_ptcga __ptcga +#define ia64_ptri __ptri +#define ia64_ptrd __ptrd +#define ia64_dep_mi _m64_dep_mi + +/* Values for lfhint in __lfetch and __lfetch_fault */ + +#define ia64_lfhint_none 0 +#define ia64_lfhint_nt1 1 +#define ia64_lfhint_nt2 2 +#define ia64_lfhint_nta 3 + +#define ia64_lfetch __lfetch +#define ia64_lfetch_excl __lfetch_excl +#define ia64_lfetch_fault __lfetch_fault +#define ia64_lfetch_fault_excl __lfetch_fault_excl + +#define ia64_intrin_local_irq_restore(x) \ +do { \ + if ((x) != 0) { \ + ia64_ssm(IA64_PSR_I); \ + ia64_srlz_d(); \ + } else { \ + ia64_rsm(IA64_PSR_I); \ + } \ +} while (0) + +#endif /* _ASM_IA64_INTEL_INTRIN_H */ diff -Nru a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h --- a/include/asm-ia64/spinlock.h Tue Sep 9 01:05:06 2003 +++ b/include/asm-ia64/spinlock.h Tue Sep 9 01:05:06 2003 @@ -24,6 +24,7 @@ #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define spin_lock_init(x) ((x)->lock = 0) +#ifdef ASM_SUPPORTED /* * Try to get the lock. If we fail to get the lock, make a non-standard call to * ia64_spinlock_contention(). We do not use a normal call because that would force all @@ -85,6 +86,21 @@ # endif /* CONFIG_MCKINLEY */ #endif } +#else /* !ASM_SUPPORTED */ +# define _raw_spin_lock(x) \ +do { \ + __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ + __u64 ia64_spinlock_val; \ + ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ + if (unlikely(ia64_spinlock_val)) { \ + do { \ + while (*ia64_spinlock_ptr) \ + ia64_barrier(); \ + ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ + } while (ia64_spinlock_val); \ + } \ +} while (0) +#endif /* !ASM_SUPPORTED */ #define spin_is_locked(x) ((x)->lock != 0) #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) @@ -117,22 +133,19 @@ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) +#ifdef ASM_SUPPORTED #define _raw_write_lock(rw) \ do { \ __asm__ __volatile__ ( \ "mov ar.ccv = r0\n" \ - "dep r29 = -1, r0, 31, 1\n" \ - ";;\n" \ + "dep r29 = -1, r0, 31, 1;;\n" \ "1:\n" \ - "ld4 r2 = [%0]\n" \ - ";;\n" \ + "ld4 r2 = [%0];;\n" \ "cmp4.eq p0,p7 = r0,r2\n" \ "(p7) br.cond.spnt.few 1b \n" \ - "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \ - ";;\n" \ + "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \ "cmp4.eq p0,p7 = r0, r2\n" \ - "(p7) br.cond.spnt.few 1b\n" \ - ";;\n" \ + "(p7) br.cond.spnt.few 1b;;\n" \ :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ } while(0) @@ -142,12 +155,34 @@ \ __asm__ __volatile__ ( \ "mov ar.ccv = r0\n" \ - "dep r29 = -1, r0, 31, 1\n" \ - ";;\n" \ + "dep r29 = -1, r0, 31, 1;;\n" \ "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \ : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \ (result == 0); \ }) + +#else /* !ASM_SUPPORTED */ + +#define _raw_write_lock(l) \ +({ \ + __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ + __u32 ia64_write_lock_ptr = (__u32 *) (l); \ + do { \ + while (*ia64_write_lock_ptr) \ + ia64_barrier(); \ + ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \ + } while (ia64_val); \ +}) + +#define _raw_write_trylock(rw) \ +({ \ + __u64 ia64_val; \ + __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ + ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \ + (ia64_val == 0); \ +}) + +#endif /* !ASM_SUPPORTED */ #define _raw_write_unlock(x) \ ({ \ diff -Nru a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h --- a/include/asm-ia64/uaccess.h Tue Sep 9 01:05:06 2003 +++ b/include/asm-ia64/uaccess.h Tue Sep 9 01:05:06 2003 @@ -33,6 +33,7 @@ #include #include +#include #include /* @@ -86,6 +87,8 @@ #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#ifdef ASM_SUPPORTED + extern void __get_user_unknown (void); #define __get_user_nocheck(x,ptr,size) \ @@ -216,6 +219,90 @@ "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \ "[1:]" \ : "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err)) + +#else /* !ASM_SUPPORTED */ + +#define RELOC_TYPE 2 /* ip-rel */ + +#define __put_user_xx(val, addr, size, err) \ + __st_user("__ex_table", (unsigned long) addr, size, RELOC_TYPE, (unsigned long) (val)); \ + (err) = ia64_getreg(_IA64_REG_R8); + +#define __get_user_xx(val, addr, size, err) \ + __ld_user("__ex_table", (unsigned long) addr, size, RELOC_TYPE); \ + (err) = ia64_getreg(_IA64_REG_R8); \ + (val) = ia64_getreg(_IA64_REG_R9); + +extern void __get_user_unknown (void); + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + register long __gu_err = 0; \ + register long __gu_val = 0; \ + const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + switch (size) { \ + case 1: case 2: case 4: case 8: \ + __get_user_xx(__gu_val, __gu_addr, size, __gu_err); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + (x) = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x,ptr,size,segment) \ +({ \ + register long __gu_err = -EFAULT; \ + register long __gu_val = 0; \ + const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + if (__access_ok((long) __gu_addr, size, segment)) { \ + switch (size) { \ + case 1: case 2: case 4: case 8: \ + __get_user_xx(__gu_val, __gu_addr, size, __gu_err); \ + break; \ + default: \ + __get_user_unknown(); break; \ + } \ + } \ + (x) = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +extern void __put_user_unknown (void); + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + int __pu_err = 0; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + switch (size) { \ + case 1: case 2: case 4: case 8: \ + __put_user_xx(x, __pu_addr, size, __pu_err); \ + break; \ + default: \ + __put_user_unknown(); break; \ + } \ + __pu_err; \ +}) + +#define __put_user_check(x,ptr,size,segment) \ +({ \ + register long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + if (__access_ok((long)__pu_addr,size,segment)) { \ + switch (size) { \ + case 1: case 2: case 4: case 8: \ + __put_user_xx(x,__pu_addr, size, __pu_err); \ + break; \ + default: \ + __put_user_unknown(); break; \ + } \ + } \ + __pu_err; \ +}) + +#endif /* !ASM_SUPPORTED */ /* * Complex access routines